Hi, after a long time I am back with a new post. Recently during a completly dynamic silverlight framework developement I had to create dynamic layout which can create tabular layout as was created in html. eg. You can see here the html for a table and its representation. I had to read this html at runtime and create a similar layout in silverlight at run time. That layout was actually the layout of the main shell of the framework. The html part I put into somewhere in the clientbin and instructed in the code to read it using web-client. Once the xml was read it was sent to be processed by the service class. Entity: public enum VAlignment { bottom = 1, middle = 2, top = 3, stretch = 4 }
public enum HAlignment { center = 1, left = 2, right = 3, stretch = 4 }
public enum Unit { pixcel = 1, percent = 2 }
public class TableBase { private HAlignment _HAlign; private VAlignment _VAlign; private string _Height; private double _height; private string _Width; private double _width;
public string Name { get; set; }
public string SetWidth { set { _Width = value;
if (double.TryParse(_Width, out _width)) { WidthUnit = Unit.pixcel; } else { if (_Width.Trim().Length > 0 && _Width.Trim().Split(new char[] { '%' }).Length > 1) { if (double.TryParse(_Width.Trim().Split(new char[] { '%' })[0], out _width)) { WidthUnit = Unit.percent; } } else { if (_Width.Trim().Length > 0 && _Width.Trim().Substring(_Width.Trim().Length - 2, _Width.Trim().Length).ToLower() == "px") { if (double.TryParse(_Width.Trim().Substring(0, _Width.Trim().Length - 2), out _width)) { WidthUnit = Unit.pixcel; } } } } } get { return _Width; } }
public double GetWidth { get { return _width; } }
public string SetHeight { set { _Height = value;
if (double.TryParse(_Height, out _height)) { HeightUnit = Unit.pixcel; } else { if (_Height.Trim().Length > 0 && _Height.Trim().Split(new char[] { '%' }).Length > 1) { if (double.TryParse(_Height.Trim().Split(new char[] { '%' })[0], out _height)) { HeightUnit = Unit.percent; } } else { if (_Height.Trim().Length > 0 && _Height.Trim().Substring(_Height.Trim().Length - 2, _Height.Trim().Length).ToLower() == "px") { if (double.TryParse(_Height.Trim().Substring(0, _Height.Trim().Length - 2), out _height)) { HeightUnit = Unit.pixcel; } } } } } get { return _Height; } }
public double GetHeight { get { return _height; } }
public Unit WidthUnit { get; private set; }
public Unit HeightUnit { get; private set; }
public string SetHAlign { set { if (value.ToLower() == "center") _HAlign = HAlignment.center; else if (value.ToLower() == "left") _HAlign = HAlignment.left; else if (value.ToLower() == "right") _HAlign = HAlignment.right; else if (value.ToLower() == "stretch") _HAlign = HAlignment.stretch; else _HAlign = HAlignment.center; } }
public HAlignment GetHAlign { get { return _HAlign; } }
public string SetVAlign { set { if (value.ToLower() == "middle") _VAlign = VAlignment.middle; else if (value.ToLower() == "bottom") _VAlign = VAlignment.bottom; else if (value.ToLower() == "top") _VAlign = VAlignment.top; else if (value.ToLower() == "stretch") _VAlign = VAlignment.stretch; else _VAlign = VAlignment.middle; } }
public VAlignment GetVAlign { get { return _VAlign; } } }
public class Table : TableBase { public List<TypeRow> TR { get; set; } } --------------------------------------------------------------------------------------------- Helper Class: private static XElement XMLStructure() { return XElement.Load(@"Data/LayoutStructure.xml"); }
public static IEnumerable<Table> GetTables() { XElement documentRoot = XMLStructure(); List<Table> tblList = new List<Table>(); foreach (XNode node in documentRoot.Nodes()) { XElement childElement = XElement.Parse(node.ToString()); if (childElement.Name.LocalName == "table") { Table tbl = new Table(); tbl.Name = childElement.Attribute("id") != null ? childElement.Attribute("id").Value : ""; tbl.SetWidth = childElement.Attribute("width") != null ? childElement.Attribute("width").Value : ""; tbl.SetHeight = childElement.Attribute("height") != null ? childElement.Attribute("height").Value : ""; tbl.SetHAlign = childElement.Attribute("align") != null ? childElement.Attribute("align").Value : ""; tbl.SetVAlign = childElement.Attribute("valign") != null ? childElement.Attribute("valign").Value : "";
tbl.TR = getTR(childElement); tblList.Add(tbl); } } return tblList; }
private static List<TypeRow> getTR(XElement doc) { List<TypeRow> trList = new List<TypeRow>(); foreach (XNode node in doc.Nodes()) { XElement childElement = XElement.Parse(node.ToString()); if (childElement.Name.LocalName == "tr") { TypeRow tr = new TypeRow(); tr.Name = childElement.Attribute("id") != null ? childElement.Attribute("id").Value : ""; tr.SetWidth = childElement.Attribute("width") != null ? childElement.Attribute("width").Value : ""; tr.SetHeight = childElement.Attribute("height") != null ? childElement.Attribute("height").Value : ""; tr.SetHAlign = childElement.Attribute("align") != null ? childElement.Attribute("align").Value : ""; tr.SetVAlign = childElement.Attribute("valign") != null ? childElement.Attribute("valign").Value : "";
tr.TD = getTD(childElement); trList.Add(tr); } } return trList; }
private static List<TypeData> getTD(XElement doc) { List<TypeData> tdList = new List<TypeData>(); foreach (XNode node in doc.Nodes()) { XElement childElement = XElement.Parse(node.ToString()); if (childElement.Name.LocalName == "td") { TypeData td = new TypeData(); td.Name = childElement.Attribute("id") != null ? childElement.Attribute("id").Value : ""; td.SetWidth = childElement.Attribute("width") != null ? childElement.Attribute("width").Value : ""; td.SetHeight = childElement.Attribute("height") != null ? childElement.Attribute("height").Value : ""; td.SetHAlign = childElement.Attribute("align") != null ? childElement.Attribute("align").Value : ""; td.SetVAlign = childElement.Attribute("valign") != null ? childElement.Attribute("valign").Value : "";
td.TABLE = getTable(childElement); tdList.Add(td); } } return tdList; }
private static List<Table> getTable(XElement obj) { List<Table> tblList = new List<Table>(); foreach (XNode node in obj.Nodes()) { XElement childElement = XElement.Parse(node.ToString()); if (childElement.Name.LocalName == "table") { Table tbl = new Table(); tbl.Name = childElement.Attribute("id") != null ? childElement.Attribute("id").Value : ""; tbl.SetWidth = childElement.Attribute("width") != null ? childElement.Attribute("width").Value : ""; tbl.SetHeight = childElement.Attribute("height") != null ? childElement.Attribute("height").Value : ""; tbl.SetHAlign = childElement.Attribute("align") != null ? childElement.Attribute("align").Value : ""; tbl.SetVAlign = childElement.Attribute("valign") != null ? childElement.Attribute("valign").Value : "";
tbl.TR = getTR(childElement); tblList.Add(tbl); } } return tblList; } ------------------------------------------------------------------------------------------ Service Class: public UIElement GetUI() { GridList = new List<Grid>(); IEnumerable<Table> TableCollection = XmlHelper.GetTables(); Grid grd = new Grid(); grd.SetValue(Grid.RowProperty, 0); grd.SetValue(Grid.ColumnProperty, 0); grd.Width = LayoutWidth; grd.Height = LayoutHeight; if (TableCollection.Count() > 0) { grd = CreateTable(TableCollection.First(), grd); SetPostProperties(grd); }
return grd; }
private void SetPostProperties(Grid grd) { double widthSum=0, heightSum=0, cntWidth=0, cntHeight=0;
if (grd.Children.Count != grd.RowDefinitions.Count) { foreach (UIElement ele in grd.Children) { if (ele.GetType().Name == "Grid") { if (!double.IsNaN(((Grid)ele).Width)) widthSum += ((Grid)ele).Width; else cntWidth += 1;
} } }
if (grd.Children.Count != grd.ColumnDefinitions.Count) { foreach (UIElement ele in grd.Children) { if (ele.GetType().Name == "Grid") {
if (!double.IsNaN(((Grid)ele).Height)) heightSum += ((Grid)ele).Height; else cntHeight += 1; } } }
if(cntWidth==0) cntWidth=1; if(cntHeight==0) cntHeight=1;
foreach (UIElement ele in grd.Children) { if (ele.GetType().Name == "Grid") { if (double.IsNaN(((Grid)ele).Width) && cntWidth>0) ((Grid)ele).Width = (((Grid)((Grid)ele).Parent).Width - widthSum) / cntWidth; if (double.IsNaN(((Grid)ele).Height) && cntHeight>0) ((Grid)ele).Height = (((Grid)((Grid)ele).Parent).Height - heightSum) / cntHeight;
SetPostProperties((Grid)ele); } } }
#endregion
private Grid CreateTable(Table tab, Grid grd) { grd = SetGridProperty(tab, grd);
foreach (TypeRow tr in tab.TR) { RowDefinition rd = new RowDefinition(); if (tr.HeightUnit == 0) rd.Height = new GridLength(1.0,GridUnitType.Star); else rd.Height = new GridLength(tr.GetHeight);
grd.RowDefinitions.Add(rd); }
int i = 0; foreach (TypeRow tr in tab.TR) { Grid grdchild = new Grid(); grdchild = SetGridProperty(tr, grdchild); grdchild.SetValue(Grid.RowProperty, i);
foreach (TypeData td in tr.TD) { ColumnDefinition cd = new ColumnDefinition(); if (td.WidthUnit == 0) cd.Width = new GridLength(1.0, GridUnitType.Star); else cd.Width = new GridLength(td.GetWidth); grdchild.ColumnDefinitions.Add(cd); }
int j = 0; foreach (TypeData td in tr.TD) { Grid grdgrandchild = new Grid(); grdgrandchild = SetGridProperty(td, grdgrandchild); grdgrandchild.SetValue(Grid.ColumnProperty, j);
if (td.TABLE.Count > 0) { Grid paramGrid = new Grid(); paramGrid.SetValue(Grid.RowProperty, i); paramGrid.SetValue(Grid.ColumnProperty, j); Table childtab = td.TABLE.First(); grdgrandchild.Children.Add(CreateTable(childtab, paramGrid)); //SetGridProperty(childtab, paramGrid); } j++;
GridList.Add(grdgrandchild); grdchild.Children.Add(grdgrandchild); //SetGridProperty(td, grdgrandchild); } GridList.Add(grdchild); grd.Children.Add(grdchild); //SetGridProperty(tr, grdchild); i++; } GridList.Add(grd); return grd; }
private Grid SetGridProperty(TableBase tab, Grid grd) { //grd.ShowGridLines = true; //grd.Background = new SolidColorBrush(Colors.Orange); if(tab.Name!="") grd.Name = tab.Name;
if (tab.Name == "ContentGrid") { grd.ColumnDefinitions.Add(new ColumnDefinition() {Width=new GridLength(0.0) }); grd.ColumnDefinitions.Add(new ColumnDefinition()); }
if (grd.Parent != null && grd.Parent.GetType().Name == "Grid") { grd.Width = ((Grid)grd.Parent).ActualWidth; grd.Height = ((Grid)grd.Parent).ActualHeight; }
if (tab.WidthUnit == Unit.percent) grd.Width = (grd.ActualWidth * tab.GetWidth) / 100; else if (tab.WidthUnit == Unit.pixcel) grd.Width = tab.GetWidth;
if (tab.HeightUnit == Unit.percent) grd.Height = (grd.ActualHeight * tab.GetHeight) / 100; else if (tab.HeightUnit == Unit.pixcel) grd.Height = tab.GetHeight;
if (tab.GetVAlign == VAlignment.bottom) grd.VerticalAlignment = VerticalAlignment.Bottom; else if (tab.GetVAlign == VAlignment.middle) grd.VerticalAlignment = VerticalAlignment.Center; else if (tab.GetVAlign == VAlignment.stretch) grd.VerticalAlignment = VerticalAlignment.Stretch; else if (tab.GetVAlign == VAlignment.top) grd.VerticalAlignment = VerticalAlignment.Top;
if (tab.GetHAlign == HAlignment.center) grd.HorizontalAlignment = HorizontalAlignment.Center; else if (tab.GetHAlign == HAlignment.left) grd.HorizontalAlignment = HorizontalAlignment.Left; else if (tab.GetHAlign == HAlignment.right) grd.HorizontalAlignment = HorizontalAlignment.Right; else if (tab.GetHAlign == HAlignment.stretch) grd.HorizontalAlignment = HorizontalAlignment.Stretch;
return grd; }
During the weekend I came across Dino Esposito’s blog and found few interesting lines. Here it goes… “It seems that the bar of software preview has been moved one more tick down. There was a time when any software was top secret until publicly released. Next, we assisted to pre-release announcements and demos. Next, we were allowed to play ourselves with beta versions for longer and longer times. And our feedback was first appreciated and then requested. Next, it was the season of CTPs, progressive alpha versions that precede the beta stage–the teen-age of software.With the MVC framework we reached the notable point of publicly discussing a piece of software that is not even a CTP. I wonder what would be the next step. Discussing ideas, I bet. And after that?” It made me enough curious about MVC Framework May be not because I am very aggressive about new things but at least because you must be updated every minute in this industry or just get ready to die and that I don’t want… There is not enough material available on this as it is in its infancy but I found some interesting things that I want to share with you…The first thing is the ScottGu’s presentation on this at Alt.Net. See video. And next is ScottGu’s blog on the same. These two sources are native and apart from them I went across several blogs from the attendees of the Alt.Net presentation. I tried to assemble all the main content and some of my findings in this article and hope it might be helpful. Starting with some issues…Being an ASP.Net developer I often run through a problem – How to write a unit test in a tightly coupled system. You may ask why to develop a tightly coupled application if I want to follow the test driven design. Then what I can do…I can use the MVC (Master view controller) pattern to develop my application. In fact ASP.Net itself supports the MVC by code behind model where you can say each aspx.vb class as a controller and aspx as a view. I will brief about the MVC as we will go a little further. For now I am stating another problem…ASP.Net is based on two primary things…Postback and Viewstate. And both of them have several problems associated with them. Trust me. Just do a googling on the phrase “Viewstate Problem” and you will find millions of entries. So as the name suggests, MVC Framework is primarily based on and tried to remove the complexity of implementation of MVC design pattern. What is MVC pattern is all about?Model-View-Controller (MVC) PatternThe Model-View-Controller (MVC) pattern separates the modeling of the domain, the presentation, and the actions based on user input into three separate classes. Model. The model manages the behavior and data of the application domain, responds to requests for information about its state (usually from the view), and responds to instructions to change state (usually from the controller). View. The view manages the display of information. Controller. The controller interprets the mouse and keyboard inputs from the user, informing the model and/or the view to change as appropriate. It is important to note that both the view and the controller depend on the model. However, the model depends on neither the view nor the controller. This is one the key benefits of the separation. This separation allows the model to be built and tested independent of the visual presentation. The separation between view and controller is secondary in many rich-client applications, and, in fact, many user interface frameworks implement the roles as one object. In Web applications, on the other hand, the separation between view (the browser) and controller (the server-side components handling the HTTP request) is very well defined. Read More to learn how to implement MVC pattern. Now, in ScottGu’s word some of the key points of MVC Framework: It enables clean separation of concerns, testability, and TDD(Test Driven Design) by default. All core contracts within the MVC framework are interface based and easily mockable (it includes interface based IHttpRequest/IHttpResponse intrinsics). You can unit test the application without having to run the Controllers within an ASP.NET process (making unit testing fast). You can use any unit testing framework you want to-do this testing (including NUnit, MBUnit, MS Test, etc).
It is highly extensible and pluggable. Everything in the MVC framework is designed so that it can be easily replaced/customized (for example: you can optionally plug-in your own view engine, routing policy, parameter serialization, etc). It also supports using existing dependency injection and IOC container models (Windsor, Spring.Net, NHibernate, etc).
It includes a very powerful URL mapping component that enables you to build applications with clean URLs. URLs do not need to have extensions within them, and are designed to easily support SEO and REST-friendly naming patterns. For example, I could easily map the /products/edit/4 URL to the “Edit” action of the ProductsController class in my project above, or map the /Blogs/scottgu/10-10-2007/SomeTopic/ URL to a “DisplayPost” action of a BlogEngineController class.
The MVC framework supports using the existing ASP.NET .ASPX, .ASCX, and .Master markup files as “view templates” (meaning you can easily use existing ASP.NET features like nested master pages, <%= %> snippets, declarative server controls, templates, data-binding, localization, etc). It does not, however, use the existing post-back model for interactions back to the server. Instead, you’ll route all end-user interactions to a Controller class instead - which helps ensure clean separation of concerns and testability (it also means no viewstate or page lifecycle with MVC based views).
The ASP.NET MVC framework fully supports existing ASP.NET features like forms/windows authentication, URL authorization, membership/roles, output and data caching, session/profile state management, health monitoring, configuration system, the provider architecture, etc.
There are some good implementation examples available on this framework: ASP.NET MVC Framework by ScottGu RSS Feed with the new ASP.NET MVC Framework by Brad Abrams Hanselman’s demo at Alt.Net conference. I guess, now you are much familiar with MVC framework, but this is the time to elaborate few of the points that ScottGu has pointed out in his blog. The first point is Dependency Injection. What is this??? Now a day the focus is on reusing existing components and wiring together disparate components to form a cohesive architecture. But this wiring can quickly become a scary task because as application size and complexity increase, so do dependencies. One way to mitigate the proliferation of dependencies is by using Dependency Injection (DI), which allows you to inject objects into a class, rather than relying on the class to create the object itself. Wanna go in detail? Wait for my next post in which I am planning to elaborate DI and Spring.Net and describe about limitations of Factory, Abstract Factory, Builder and Container.
The next thing that ScottGu has talked about is REST (Representational State Transfer)… Well, REST is an architectural pattern that defines how network resources should be defined and addressed in order to gain shorter response times, clear separation of concerns between the front-end and back-end of a networked system. REST is based on three following principles:
- An application expresses its state and implements its functionality by acting on logical resources.
- Each resource is addressed using a specific URL syntax.
- All addressable resources feature a contracted set of operations.
As you can see, the MVC Framework fulfills it entirely.
The MVC Framework doesn’t support classic postbacks and viewstate and doesn’t consider any URL as the endpoint to a physical server file to parse and compile to a class. In ASP.NET, you have a 1:1 correspondence between a URL and a resource. The only exception to this rule is when you use completely custom HTTP handlers bound to a particular path.
In the MVC Framework, a URL is seen as the mean to address a logical server resource, but not necessarily an ASPX file to parse. So the URLs employed by the pages of an MVC Framework application have a custom format that the application itself mandates. In the end, the MVC Framework employs a centralized HTTP handler that recognizes an application-specific syntax for links. In addition, each addressable resource exposes a well-known set of operations and a uniform interface for executing operations.
So here’s an alternate way of looking at the MVC Framework. It is an ASP.NET framework that performs data exchange by using a REST model versus the postback model of classic ASP.NET. Each page is split into two distinct components -controller and view - that operate over the same model of data. This is opposed to the classic code-behind model where no barrier is set that forces you to think in terms of separation of concerns and controllers and views. However, by keeping the code-behind class as thin as possible, and designing the business layer appropriately, a good developer could achieve separation of concerns even without adopting MVC and its overhead. MVC, however, is a model superior to a properly-done code-behind for its inherent support for test-driven development.
Visual Studio Orcas, due for release at the end of 2007, promises numerous improvements for Visual Basic, a data query called the Language Integrated Query (LINQ), a new Entity Framework for ADO.NET and updated tools for ASP.NET AJAX and Office 2007 development.
Multi-targeting Visual Studio Orcas is billed as the design surface for the .NET Framework 3.5, which itself is the merger of the .NET 3.0 toolset introduced earlier this year with updated versions of ASP.NET, ADO.NET, Visual Basic, C# and the CLR. At the same time, though, Orcas allows developers to work backwards and develop specifically for .NET 2.0 or 3.0. In the words of Jeff King, program manager on the Visual Studio team, the tool and the framework are decoupled: "It really buys you freedom." Once a framework version has been selected, Visual Studio Orcas will enable the reference features that are appropriate for that version of the framework. (In other words, don't try using LINQ in a .NET 2.0 application.)
N-tier application development An n-tier application is spread among any number of different systems, typically a service layer, an access layer and a business logic layer. With such a model, it is easy to share validation logic between entities, said Young Joo, a Visual Basic program manager. Unfortunately, developing such applications in Visual Studio 2005 is "pretty much impossible," Joo said, because a dataset and the code that connects it to a database are in the same file. The change in Visual Studio Orcas is subtle but important, as the table and the dataset now reside in different layers.
An improved designer King described the Visual Studio 2005 designer as little more the Internet Explorer renderer turned into an editor. To improve upon this, the Visual Studio group turned to Expression, the Microsoft product suite for Web designers. The new designer allows developers to apply styles in line, with an existing class, with a new class or with Internet Explorer. "We default manually nowadays," King said. In addition, changes such as margins and paddings around images can be applied to rules and not just individually. This also makes for cleaner CSS files. Finally, the designer offers a split view, so developers can look at source code and design simultaneously. This is a response to the growing trend of development using two monitors, King said.
ASP.NET AJAX and VSTO for Office 2007 Right now, developers aiming to build cutting edge Web applications have to download the ASP.NET AJAX framework, and those who want to develop for Office 2007 have to download Visual Studio 2005 Tools for Office Second Edition. Both ASP.NET AJAX and VSTO 2005 SE will be directly incorporated into Visual Studio Orcas. VSTO will come with a new runtime, which will run both Office 2007 and Office 2003 add-ins, while ASP.NET AJAX will come with a variety of JavaScript tools, such as IntelliSense and robust debugging.
The ADO.NET Entity Framework The biggest changes to ADO.NET revolve around its Entity Framework, which, unfortunately, is now slated to be released quite a while after Visual Studio 2008. This framework consists of a conceptual layer, which fits between an application's logical database layer and its object layer, and the Entity Data Model, said Julia Lerman, consultant and owner of The Data Farm. Run the Entity Data Model Wizard in Visual Studio Orcas and the output is three files -- a conceptual model that talks to object classes, a logical model that the relational database talks to, and map between the conceptual and logical models. Within the conceptual layer, one finds entity types bundled into sets, associations that define the relationship between entities, and sets of associations. The information in this layer will handle the back and forth with SQL Server without touching data access code, Lerman said. Once entities have been created, developers can use the either CreateQuery or new LINQ to Entities query to retrieve entity objects, data records or anonymous types, Lerman said.
LINQ: The Language Integrated Query In Visual Studio 2005, querying a dataset typically involves a stored procedure, a newly created view and a bit of ADO.NET filtering. This is the case because data exists in rows, while .NET code deals with objects. "They are always two different worlds," said Jeff King, a program manager on the Visual Studio team. "LINQ puts queries inside the languages and merges the worlds together." At its most basic level, the Language Integrated Query, a feature of both Visual Basic and C#, uses the concept of the SQL Select statement to make its queries. However, there are two important differences, said Markus Egger, president and chief software architect at EPS Software Corp. and publisher of CoDe Magazine. First, LINQ statements begin with a From statement instead of the Select statement. By listing the data source first, IntelliSense is triggered, Egger said. Second, since C# and Visual Basic are object-oriented languages, he said, "whatever you can express in C# or VB you can make part of LINQ queries." This encompasses anything that is IEnumerable -- entities, collections and even XML. Moreover, since the queries are being made in an object-oriented environment, Egger said, "you can do very, very complex things that result in a completely different result set," such as calling up an instance of objects that did not exist in the source at all. LINQ also brings about the introduction of several new language concepts for Visual Basic and C#. The expression tree, for example, is a data representation of the LINQ expression that bridges the gap between the .NET code and the SQL Server. In addition, property initialization makes it possible to create an object and set its properties in a single line of code. Other new language concepts, which will be discussed below, include implicit types and extension methods.
VB 9: Implicit types In a nutshell, author and consultant Billy Hollis said, implicit types provide strong typing without forcing developers to figure out the type they need. The compiler does the work for them, since the type is inferred from the initializer expression. Implicit types work well when looping through a collection, Hollis said, since in such a scenario a developer is likely to be looking only for a key and a value and will not know, or care, what the index type is. In addition, inferring types makes it possible for extensions to bind to data types such as XML. This is fundamental to making LINQ work, Hollis said.
VB 9: Extension Methods Extension methods, which Hollis described as "syntactic sugar," are another Visual Basic 9 feature coming straight from LINQ, since all LINQ query operators are extension methods. These methods are marked with custom attributes and are then added to other objects automatically (so long as they are not already there). Hollis said extension methods can be used just about anywhere a developer would use a normal function. However, they cannot contain optional parameters, parameter arrays or generic arguments that have not been typed. Also, late binding cannot be done with extension methods, he said.
VB 9: IntelliSense IntelliSense, already referred to as "Intellicrack" in some development circles, is set in Visual Basic 9 to encompass keywords, member variables and anything in scope. "Anything that would make sense there, IntelliSense will put it in," Hollis said. Moreover, IntelliSense will work with implicit types, once the compiler has figured out what is they type, Egger said. In addition, LINQ, as stated, is set up to take advantage of IntelliSense. In SQL syntax, the Select query comes first, but in LINQ, the From statement comes first. With the data source listed first, IntelliSense has a chance to kick in. VB 9: Relaxed delegates, initializers and more For additional information on what's new in Visual Basic 9, including relaxed delegates and initializers, check out the MSDN article Overview of Visual Basic 9.0. The emphasis there is on productivity gains developers can expect to enjoy when building data-oriented applications with an increasingly dynamic language.
With the first, public alpha version of the coming release of Visual Studio .NET—christened "Whidbey"—now in the hands of developers, it's time to start thinking about your applications and how they might be affected as you move to this new version. Although the move from version 1.0 to 1.1 of the .NET Framework was trivial and involved mostly bug fixes, performance enhancements, and the integration of previously separate technologies such as the ODBC and Oracle .NET Data Providers, version 2.0 changes the story for data access. It includes a host of new features, some of which may cause you to rethink how data is accessed and manipulated in your applications.
In this article, I'll give you a brief rundown of what I see as the most significant new features in ADO.NET and how you might take advantage of them in your implementations.
Providing a wider view of data Before delving into the specific features of ADO.NET v2.0, let me preface the discussion by noting that one of the overall design goals of this version was to allow a higher degree of interoperability between data accessed relationally, accessed as XML, and accessed as custom objects. Since these three make up the "ruling triumvirate" in representing data in an application, ADO.NET v2.0 was designed to make it easier for developers to use the appropriate model when desired within and across applications.
For example, in applications built using a service-oriented architecture (SOA), persistent business data will often be manipulated relationally; data that represents the process and encapsulates business rules will be manipulated as objects; and message and lookup data that must be serialized for transport will be handled as XML. To present the new features, I've factored them into two broad buckets: the new features that provide this wider view of data and the features that enhance or extend the relational paradigm.
Widening the .NET There are two primary new features you'll want to explore in the area of extending your ability to handle data. Let's take a look at each.
ObjectSpaces This technology was previewed several years ago at PDC and will now be released in Whidbey. Simply put, ObjectSpaces provides an object-relational mapping layer in the System.Data.ObjectSpaces namespace, which instantiates and populates custom objects from a relational database. This works through XML metadata stored in a mappings file that is passed to the constructor of the ObjectSpace class, which maps relational objects to .NET objects and relational types to .NET types.
The programming model supports queries (ObjectQuery) and maintains sets of objects in memory (ObjectSet), access to streams of objects (ObjectReader), and even lazy loading of objects to improve performance (ObjectList and ObjectHolder). Following is an example of how the programming model looks:
// Create the mappings ObjectSpace oa = new ObjectSpace(mappings-file, connection); // Query the data ObjectQuery oq = new ObjectQuery(Product, "category='Equipment'"); ObjectReader or = oa.GetObjectReader(oq); // Traverse the data while (or.Read()) { Product p = (Product)or.Current; Console.WriteLine(p.Name); } Although in the current release, ObjectSpaces works only with SQL Server 2000 and SQL Server "Yukon" (the release of SQL Server more or less synchronized with the release of Whidbey), this technology will be extended to access other relational stores in the future. ObjectSpaces is ideal when you want to represent your data using a domain model and encapsulate business logic as methods on your custom objects, since it will save you from writing the tiresome code needed to load from and persist your objects to a relational data store.
SQLXML and XmlAdapter Although the ADO.NET DataSet has always included the ability to load data as XML and serialize its contents as XML, the translation between the two ways of representing data always included some tension. For example, in order for the XML to load into a DataSet, its schema couldn't be overly complex, and it needed to map well into the relational DataTables of the DataSet.
Although DataSet support of XML has been enhanced in version 2 to allow loading of XML with multiple in-line schemas, loading schemas with repeated element names in different namespaces, and loading/serializing directly from DataTable objects, the data must still be relational in nature to work with the DataSet. To overcome this, version 2 includes the System.Xml.XmlAdapter class. This class is analogous to the DataAdapter classes in that it is a liaison between a data source and a representation of the data, but is used to query and load XML from an XML View into an XPathDocument object (called XPathDocument2 in the alpha; however, that will be renamed to XPathDocument before release).
XML Views allow relational tables (in SQL Server only) to be mapped to an XML schema via mappings files; they are the core component of the SQLXML 3.0 technology, once provided separately from the .NET Framework but now migrated into ADO.NET v2 (including the ability to bulk-load XML into SQL Server) in the System.Data.SqlXml namespace. Using this approach, you can provide a set of XML Views for your SQL Server data, query the data with the XQuery language using the new XQueryProcessor class and the Fill method of the XmlAdapter, and manipulate the data using the XPathDocument, XPathEditor, XPathNavigator, and XPathChangeNavigator classes.
The changes are written to SQL Server by calling the Update method of the XmlAdapter, which relies on the XML View to write the SQL statements to execute using a mappings file. The advantage of this approach is that you can treat your SQL Server data no differently than other XML data stores and can take advantage of the full fidelity of the XML when making changes. See a simple example of the programming model.
// Set up the connection and query SqlConnection con = new SqlConnection(connection-string); XQueryProcessor xq = new XQueryProcessor(); xq.XmlViewSchemaDictionary.Add("name", new XmlTextReader("mappings-file")); xq.Compile(…); // Set up the datasource XmlDataSourceResolver xd = new XmlDataSourceResolver(); xd.Add("MyDB", con); // Configure the XmlAdapter XmlAdapter xa = new XmlAdapter(xd); XPathDocument xp = new XPathDocument(); // Execute the query and populate the document xa.Fill(xp, xq.XmlCommand); // Navigate the document… XPathNavigator xn = xp.CreateXPathNavigator(); // Or edit the document and change the data XPathEditor xe = xp.CreateXPathEditor(); // Set the schema and update the database MappingSchema ms = new MappingSchema("mappings-file"); xa.Update(xp, ms);
Of course, XML Views simply provide the mapping of the data to and from SQL Server. If you're not using SQL Server, you can still take advantage of the substantial changes to XPathDocument (that will supersede and make obsolete the XmlDocument class) and its related classes to more easily query, navigate, and edit XML that you load from other sources.
For example, you can use a new XmlFactory class to create a related set of XmlReader, XmlWriter, and XPathNavigator classes for an XML document. These classes now support the ability to read and write .NET types to and from XML documents. And, of course, performance has improved for reading and writing with XmlTextReader and XmlTextWriter, and when using XSLT.
Extending the relational paradigm The second broad set of changes relates to those made in ADO.NET v2.0 to enhance relational database access. I've organized these into changes that all developers can take advantage of, regardless of the underlying database you write to and regardless of those that will require SQL Server 2000 or the next version of SQL Server, Yukon.
Provider factory Although the design of .NET Data Providers is based on a common set of interfaces and base classes, in v1.0 or v1.1, Microsoft did not ship factory classes to help developers write polymorphic data access code. As a result, developers did so on their own.
In version 2, ADO.NET includes factory classes inherited from System.Data.Common.DbProviderFactory to create the standard connection, command, data reader, table, parameter, permissions, and data adapter classes; these help you write code that targets multiple databases. A factory is accessed using the GetFactory method of the DbProviderFactories class and can be configured in the application's configuration file using the DbProviderConfigurationHandler.
Asynchronous data access Commands executed by ADO.NET in version 1.0 using the ExecuteNonQuery, ExecuteReader, and ExecuteXmlReader methods of SqlCommand were synchronous and would block the current thread until the results were returned by the server. In v2.0, each of these methods includes both Begin and End versions to support asynchronous execution from the client's perspective.
This technique employs the familiar asynchronous programming model using the AsyncCallback delegate in .NET, and so includes the SqlAsyncResult class to implement the IAsyncResult interface. While this feature works only for SqlClient at the moment, look for it to perhaps be extended to other providers before the release. Following is an example of setting up an asynchronous command. (Note that the SqlAsyncResult class is not included in the alpha at this time, so the code will not execute.)
// Set up the connection and command SqlConnection con = new SqlConnection(connection-string); SqlCommand cm = new SqlCommand(SQL statement, con); cm.Open(); cm.BeginExecuteNonQuery(new AsyncCallback(DoneExecuting), null); // Thread is free, do other things … // Callback method private void DoneExecuting(SqlAsyncResult ar) { int numRows = ar.EndExecuteNonQuery(ar); // print the number of rows affected } Batch updates In version 1.0, a DataAdapter always sent changes to rows one at a time to the server. In version 2.0, the DataAdapter exposes an UpdateBatchSize property that, if supported by the data provider, allows changed rows to be sent to the server in groups. This cuts down on the number of round-trips to the server and therefore increases performance.
Data paging In both SqlClient and OracleClient, the command object now exposes an ExecutePageReader method that allows you to pass in the starting row and the number of rows to return from the server. This allows for more efficient data access by retrieving only the rows you need to display. However, this feature reads the rows currently in the table, so subsequent calls may contain rows from the previous page because of inserts, or from the latter pages because of deletes. It therefore works best with relatively static data.
Binary DataSet remoting Version 2.0 now allows DataSets to be serialized using a binary format when employing .NET remoting. This both increases the performance of remoting data between .NET applications and reduces the number of bytes transferred.
DataSet and DataReader transfer In version 1.1, you could only load a DataSet from a DataAdapter. But in version 2.0, you can also load one directly using a DataReader and the Load method. Conversely, you can now generate a DataTableReader (inherited from DbDataReader) with the GetDataReader method in order to traverse the contents of a DataSet. This feature makes it easy to load a DataSet and view its data.
Climbing Yukon In this category are the new features of ADO.NET v2.0 that relate directly to the new release of the SQL Server code named Yukon, due out in the same time frame:
MARS Multiple active result sets (MARS) allows you to work with more than one concurrent result set on a single connection to Yukon. This can be efficient if you need to open a SqlDataReader and, during the traversal, execute a command against a particular row. MARS allows both commands to share the same SqlConnection object so that a second connection to SQL Server is not required.
Change notification One of the most interesting new features of Yukon is its ability to support notifications. ADO.NET v2.0 includes programmatic support for this feature by including a SqlNotificationRequest object that can be bound to a SqlCommand.
When data returned from the command changes in the database, a message is sent to the specified notification queue. ADO.NET code can then query the queue either by using an asynchronous query that blocks until a message is sent or by periodically checking the queue using new Transact-SQL syntax.
To make this feature even easier to work with, a SqlDependency class that sets up an asynchronous delegate is included. This will be called when the data changes, and it can be used like other dependencies in conjunction with the ASP.NET caching engine. An example of using a SqlDependency object-
// Set up the connection and command SqlConnection con = new SqlConnection(connection-string); SqlCommand cm = new SqlCommand(SELECT statement, con); SqlDependency dep = new SqlDependency(cm); dep.OnChanged += new OnChangedEventHandler (HandleChange); SqlDataReader dr = cm.ExecuteReader(); // Process the data … private void HandleChange (object sender, SqlNotificationEventArgs e) { // A change has been made to the data // Inspect the type of change using e.Type } Yukon types ADO.NET v2.0 supports the full set of Yukon data types, including XML and User Defined Types (UDTs). This means that columns in Yukon defined as XML can be retrieved as XmlReader objects, and that UDTs can be passed to stored procedures and returned from queries as standard .NET types. This allows your applications to work with data as fully formed objects while interacting with the database using the objects. This feature can be used profitably when writing managed code that runs in-process in SQL Server, allowing both the managed stored procedure and the client code to use the same .NET type.
Server-side cursors Because it often caused applications to perform poorly, ADO.NET v1.0 and v1.1 did away with the server-side cursors for ADO v2.x. ADO.NET v2.0 now reintroduces the concept in Yukon using the ExecuteResultset and ExecuteRow methods of the SqlCommand object and the SqlResultset class.
The SqlResultset class offers a fully scrollable and updateable cursor that can be useful for applications that need to traverse a large amount of data and update only a few rows. Although this feature can be used from client applications such as ASP.NET, it is mainly intended for use when writing managed code that runs in-process with Yukon in the form of stored procedures.
Bulk copy Although not restricted to Yukon, ADO.NET v2.0 now allows programmatic access to the BCP or bulk copy API exposed by SQL Server. This is done using the SqlBulkCopyOperation and SqlBulkCopyColumnAssociator classes in the System.Data.SqlClient namespace.
|